Don't reference current->domain->arch.shadow_bitmap in dirty_bit fault handler.
Instead copy it to arch_vcpu.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp));
DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
- DEFINE(IA64_VCPU_DOMAIN_OFFSET, offsetof (struct vcpu, domain));
DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, arch.hypercall_continuation));
DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu, arch.metaphysical_rid_dt));
BLANK();
- DEFINE(IA64_DOMAIN_SHADOW_BITMAP_OFFSET, offsetof (struct domain, arch.shadow_bitmap));
+ DEFINE(IA64_VCPU_SHADOW_BITMAP_OFFSET, offsetof (struct vcpu, arch.shadow_bitmap));
BLANK();
case XEN_DOMCTL_SHADOW_OP_OFF:
if (shadow_mode_enabled (d)) {
u64 *bm = d->arch.shadow_bitmap;
+ struct vcpu *v;
+
+ for_each_vcpu(d, v)
+ v->arch.shadow_bitmap = NULL;
/* Flush vhpt and tlb to restore dirty bit usage. */
domain_flush_tlb_vhpt(d);
rc = -ENOMEM;
}
else {
+ struct vcpu *v;
memset(d->arch.shadow_bitmap, 0,
d->arch.shadow_bitmap_size / 8);
-
+
+ for_each_vcpu(d, v)
+ v->arch.shadow_bitmap = d->arch.shadow_bitmap;
/* Flush vhtp and tlb to enable dirty bit
virtualization. */
domain_flush_tlb_vhpt(d);
;;
ld8 r22=[r22]
;;
- add r22=IA64_VCPU_DOMAIN_OFFSET,r22
- ;;
- ld8 r22=[r22] // read domain
- ;;
- add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
+ add r22=IA64_VCPU_SHADOW_BITMAP_OFFSET,r22
;;
ld8 r22=[r22]
;;
unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
unsigned long fp_psr; // used for lazy float register
+ u64 *shadow_bitmap; // from arch_domain (so is pinned)
int breakimm; // from arch_domain (so is pinned)
int starting_rid; /* first RID assigned to domain */
int ending_rid; /* one beyond highest RID assigned to domain */